{
unsigned long max_low_pfn;
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments);
+#if 0
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_writeable_pagetables);
+#endif
+
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
early_cpu_init();
return ret;
}
+static inline int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
+{
+ int ret;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret) : "0" (__HYPERVISOR_vm_assist),
+ "b" (cmd), "c" (type) : "memory" );
+
+ return ret;
+}
+
#endif /* __HYPERVISOR_H__ */
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
unsigned long off, addr, fixup;
- struct domain *p = current;
+ struct domain *d = current;
extern int map_ldt_shadow_page(unsigned int);
- int cpu = smp_processor_id();
+ int cpu = d->processor;
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
perfc_incrc(page_faults);
if ( unlikely(addr >= LDT_VIRT_START) &&
- (addr < (LDT_VIRT_START + (p->mm.ldt_ents*LDT_ENTRY_SIZE))) )
+ (addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
{
/*
* Copy a mapping from the guest's LDT, if it is valid. Otherwise we
* send the fault up to the guest OS to be handled.
*/
off = addr - LDT_VIRT_START;
- addr = p->mm.ldt_base + off;
+ addr = d->mm.ldt_base + off;
if ( likely(map_ldt_shadow_page(off >> PAGE_SHIFT)) )
return; /* successfully copied the mapping */
}
- if ((addr >> L2_PAGETABLE_SHIFT) == ptwr_info[cpu].disconnected) {
+ if ( (addr >> L2_PAGETABLE_SHIFT) == ptwr_info[cpu].disconnected )
+ {
ptwr_reconnect_disconnected(addr);
return;
}
- if (addr < PAGE_OFFSET && error_code & 2 && ptwr_do_page_fault(addr))
+ if ( VM_ASSIST(d, VMASST_TYPE_writeable_pagetables) &&
+ (addr < PAGE_OFFSET) &&
+ ((error_code & 3) == 3) && /* write-protection fault */
+ ptwr_do_page_fault(addr) )
return;
- if ( unlikely(p->mm.shadow_mode) &&
+ if ( unlikely(d->mm.shadow_mode) &&
(addr < PAGE_OFFSET) && shadow_fault(addr, error_code) )
return; /* Returns TRUE if fault was handled. */
if ( unlikely(!(regs->xcs & 3)) )
goto xen_fault;
- ti = p->thread.traps + 14;
+ ti = d->thread.traps + 14;
gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
gtb->cr2 = addr;
gtb->error_code = error_code;
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
return;
xen_fault:
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
perfc_incrc(copy_user_faults);
- if ( !p->mm.shadow_mode )
+ if ( !d->mm.shadow_mode )
DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup);
regs->eip = fixup;
regs->xds = regs->xes = regs->xfs = regs->xgs = __HYPERVISOR_DS;
asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
{
- struct domain *p = current;
+ struct domain *d = current;
struct guest_trap_bounce *gtb = guest_trap_bounce+smp_processor_id();
trap_info_t *ti;
unsigned long fixup;
}
#if defined(__i386__)
- if ( (error_code == 0) && gpf_emulate_4gb(regs) )
+ if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments) &&
+ (error_code == 0) &&
+ gpf_emulate_4gb(regs) )
return;
#endif
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- p->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
return;
gp_in_kernel:
goto undecodeable;
}
-#if 0
- {
- char str1[] = { 0x65,0x8b,0x00,0x8b,0x30 };
- char str2[] = { 0x65,0x8b,0x02,0x8b,0x40,0x0c };
- char str3[] = { 0x65,0x8b,0x30,0x85,0xf6 };
- char str4[] = { 0x65,0x8b,0x00,0x5d,0x8b,0x00 };
- char str5[] = { 0x65,0x89,0x30,0x8b,0x45,0x08 };
- char str6[] = { 0x65,0x8b,0x00,0x8b,0x50,0x0c };
- char str7[] = { 0x65,0x89,0x51,0x00,0x83,0xc8,0xff };
- if ( (memcmp(eip,str1,5) == 0) ||
- (memcmp(eip,str2,6) == 0) ) goto out;
- if ( (memcmp(eip,str3,5) == 0) ||
- (memcmp(eip,str4,6) == 0) ) goto out;
- if ( (memcmp(eip,str5,6) == 0) ||
- (memcmp(eip,str6,6) == 0) ) goto out;
- if ( (memcmp(eip,str7,7) == 0) ||
- (memcmp(eip,str7,7) == 0) ) goto out;
- }
- printk(" .byte 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x\n",
- eip[-8],eip[-7],eip[-6],eip[-5],eip[-4],eip[-3],eip[-2],eip[-1]);
- printk(" .byte 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x\n",
- eip[0],eip[1],eip[2],eip[3],eip[4],eip[5],eip[6],eip[7]);
- printk(" @ %04x:%08lx\n", regs->xcs, regs->eip);
-#endif
-
/* Success! */
perfc_incrc(emulations);
regs->eip += pb - eip;
.long SYMBOL_NAME(do_console_io)
.long SYMBOL_NAME(do_physdev_op)
.long SYMBOL_NAME(do_update_va_mapping_otherdomain) /* 20 */
+ .long SYMBOL_NAME(do_vm_assist)
.rept NR_hypercalls-((.-hypercall_table)/4)
.long SYMBOL_NAME(do_ni_hypercall)
.endr
return (XEN_VERSION<<16) | (XEN_SUBVERSION);
}
+long do_vm_assist(unsigned int cmd, unsigned int type)
+{
+ if ( type > (sizeof(unsigned long) * 8) )
+ return -EINVAL;
+
+ switch ( cmd )
+ {
+ case VMASST_CMD_enable:
+ set_bit(type, ¤t->vm_assist);
+ return 0;
+ case VMASST_CMD_disable:
+ clear_bit(type, ¤t->vm_assist);
+ return 0;
+ }
+
+ return -ENOSYS;
+}
+
long do_ni_hypercall(void)
{
/* No-op hypercall. */
#define __HYPERVISOR_console_io 18
#define __HYPERVISOR_physdev_op 19
#define __HYPERVISOR_update_va_mapping_otherdomain 20
+#define __HYPERVISOR_vm_assist 21
/*
* MULTICALLS
#define MEMOP_increase_reservation 0
#define MEMOP_decrease_reservation 1
+/*
+ * Commands to HYPERVISOR_vm_assist().
+ */
+#define VMASST_CMD_enable 0
+#define VMASST_CMD_disable 1
+#define VMASST_TYPE_4gb_segments 0
+#define VMASST_TYPE_writeable_pagetables 1
+
#ifndef __ASSEMBLY__
typedef u32 domid_t;
unsigned long *io_bitmap; /* Pointer to task's IO bitmap or NULL */
unsigned long flags;
+ unsigned long vm_assist;
atomic_t refcnt;
atomic_t pausecnt;
#define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
#define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
+#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
+
#include <xen/slab.h>
#include <asm/domain.h>